mfn = v->arch.vcpu_info_mfn;
unmap_domain_page_global(v->vcpu_info);
- v->vcpu_info = shared_info_addr(d, vcpu_info[v->vcpu_id]);
+ v->vcpu_info = (void *)&shared_info(d, vcpu_info[v->vcpu_id]);
v->arch.vcpu_info_mfn = INVALID_MFN;
put_page_and_type(mfn_to_page(mfn));
*/
vcpu_info(v, evtchn_upcall_pending) = 1;
for ( i = 0; i < BITS_PER_GUEST_LONG(d); i++ )
- set_bit(i, vcpu_info_addr(v, evtchn_pending_sel));
+ set_bit(i, &vcpu_info(v, evtchn_pending_sel));
/*
* Only bother to update time for the current vcpu. If we're
struct hvm_function_table hvm_funcs __read_mostly;
/* I/O permission bitmap is globally shared by all HVM guests. */
-char __attribute__ ((__section__ (".bss.page_aligned")))
- hvm_io_bitmap[3*PAGE_SIZE];
+unsigned long __attribute__ ((__section__ (".bss.page_aligned")))
+ hvm_io_bitmap[3*PAGE_SIZE/BYTES_PER_LONG];
void hvm_enable(struct hvm_function_table *fns)
{
void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr)
{
- char *msr_bitmap = v->arch.hvm_svm.msrpm;
+ unsigned long *msr_bitmap = v->arch.hvm_svm.msrpm;
/*
* See AMD64 Programmers Manual, Vol 2, Section 15.10 (MSR-Bitmap Address).
*/
if ( msr <= 0x1fff )
{
- __clear_bit(msr*2, msr_bitmap + 0x000);
- __clear_bit(msr*2+1, msr_bitmap + 0x000);
+ __clear_bit(msr*2, msr_bitmap + 0x000/BYTES_PER_LONG);
+ __clear_bit(msr*2+1, msr_bitmap + 0x000/BYTES_PER_LONG);
}
else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
{
msr &= 0x1fff;
- __clear_bit(msr*2, msr_bitmap + 0x800);
- __clear_bit(msr*2+1, msr_bitmap + 0x800);
+ __clear_bit(msr*2, msr_bitmap + 0x800/BYTES_PER_LONG);
+ __clear_bit(msr*2+1, msr_bitmap + 0x800/BYTES_PER_LONG);
}
else if ( (msr >= 0xc001000) && (msr <= 0xc0011fff) )
{
msr &= 0x1fff;
- __clear_bit(msr*2, msr_bitmap + 0x1000);
- __clear_bit(msr*2+1, msr_bitmap + 0x1000);
+ __clear_bit(msr*2, msr_bitmap + 0x1000/BYTES_PER_LONG);
+ __clear_bit(msr*2+1, msr_bitmap + 0x1000/BYTES_PER_LONG);
}
}
*/
#define VEC_POS(v) ((v)%32)
-#define REG_POS(v) (((v)/32)* 0x10)
-#define vlapic_test_and_set_vector(vec, bitmap) \
- test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
-#define vlapic_test_and_clear_vector(vec, bitmap) \
- test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
-#define vlapic_set_vector(vec, bitmap) \
- set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
-#define vlapic_clear_vector(vec, bitmap) \
- clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
+#define REG_POS(v) (((v)/32) * 0x10)
+#define vlapic_test_and_set_vector(vec, bitmap) \
+ test_and_set_bit(VEC_POS(vec), \
+ (unsigned long *)((bitmap) + REG_POS(vec)))
+#define vlapic_test_and_clear_vector(vec, bitmap) \
+ test_and_clear_bit(VEC_POS(vec), \
+ (unsigned long *)((bitmap) + REG_POS(vec)))
+#define vlapic_set_vector(vec, bitmap) \
+ set_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec)))
+#define vlapic_clear_vector(vec, bitmap) \
+ clear_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec)))
static int vlapic_find_highest_vector(void *bitmap)
{
static int vlapic_test_and_set_irr(int vector, struct vlapic *vlapic)
{
- return vlapic_test_and_set_vector(vector, &vlapic->regs->data[APIC_IRR]);
+ return vlapic_test_and_set_vector(
+ vector, (unsigned long *)&vlapic->regs->data[APIC_IRR]);
}
static void vlapic_clear_irr(int vector, struct vlapic *vlapic)
{
- vlapic_clear_vector(vector, &vlapic->regs->data[APIC_IRR]);
+ vlapic_clear_vector(
+ vector, (unsigned long *)&vlapic->regs->data[APIC_IRR]);
}
static int vlapic_find_highest_irr(struct vlapic *vlapic)
void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr)
{
- char *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
+ unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
/* VMX MSR bitmap supported? */
if ( msr_bitmap == NULL )
*/
if ( msr <= 0x1fff )
{
- __clear_bit(msr, msr_bitmap + 0x000); /* read-low */
- __clear_bit(msr, msr_bitmap + 0x800); /* write-low */
+ __clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */
+ __clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low */
}
else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
{
msr &= 0x1fff;
- __clear_bit(msr, msr_bitmap + 0x400); /* read-high */
- __clear_bit(msr, msr_bitmap + 0xc00); /* write-high */
+ __clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high */
+ __clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high */
}
}
/* MSR access bitmap. */
if ( cpu_has_vmx_msr_bitmap )
{
- char *msr_bitmap = alloc_xenheap_page();
+ unsigned long *msr_bitmap = alloc_xenheap_page();
if ( msr_bitmap == NULL )
return -ENOMEM;
return 0;
}
-static void core2_vpmu_set_msr_bitmap(char *msr_bitmap)
+static void core2_vpmu_set_msr_bitmap(unsigned long *msr_bitmap)
{
int i;
for ( i = 0; i < core2_counters.num; i++ )
{
clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap);
- clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap+0x800);
+ clear_bit(msraddr_to_bitpos(core2_counters.msr[i]),
+ msr_bitmap + 0x800/BYTES_PER_LONG);
}
for ( i = 0; i < core2_get_pmc_count(); i++ )
{
clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap);
- clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap+0x800);
+ clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i),
+ msr_bitmap + 0x800/BYTES_PER_LONG);
}
/* Allow Read PMU Non-global Controls Directly. */
clear_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL0+i), msr_bitmap);
}
-static void core2_vpmu_unset_msr_bitmap(char *msr_bitmap)
+static void core2_vpmu_unset_msr_bitmap(unsigned long *msr_bitmap)
{
int i;
for ( i = 0; i < core2_counters.num; i++ )
{
set_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap);
- set_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap+0x800);
+ set_bit(msraddr_to_bitpos(core2_counters.msr[i]),
+ msr_bitmap + 0x800/BYTES_PER_LONG);
}
for ( i = 0; i < core2_get_pmc_count(); i++ )
{
set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap);
- set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap+0x800);
+ set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i),
+ msr_bitmap + 0x800/BYTES_PER_LONG);
}
for ( i = 0; i < core2_ctrls.num; i++ )
set_bit(msraddr_to_bitpos(core2_ctrls.msr[i]), msr_bitmap);
int pirq_guest_unmask(struct domain *d)
{
unsigned int irq;
- shared_info_t *s = d->shared_info;
for ( irq = find_first_bit(d->pirq_mask, NR_IRQS);
irq < NR_IRQS;
irq = find_next_bit(d->pirq_mask, NR_IRQS, irq+1) )
{
- if ( !test_bit(d->pirq_to_evtchn[irq], __shared_info_addr(d, s, evtchn_mask)) )
+ if ( !test_bit(d->pirq_to_evtchn[irq], &shared_info(d, evtchn_mask)) )
__pirq_guest_eoi(d, irq);
}
printk("%u(%c%c%c%c)",
d->domain_id,
(test_bit(d->pirq_to_evtchn[irq],
- shared_info_addr(d, evtchn_pending)) ?
+ &shared_info(d, evtchn_pending)) ?
'P' : '-'),
(test_bit(d->pirq_to_evtchn[irq]/BITS_PER_GUEST_LONG(d),
- vcpu_info_addr(d->vcpu[0], evtchn_pending_sel)) ?
+ &vcpu_info(d->vcpu[0], evtchn_pending_sel)) ?
'S' : '-'),
(test_bit(d->pirq_to_evtchn[irq],
- shared_info_addr(d, evtchn_mask)) ?
+ &shared_info(d, evtchn_mask)) ?
'M' : '-'),
(test_bit(irq, d->pirq_mask) ?
'M' : '-'));
return mfn;
}
-static mfn_t paging_new_log_dirty_leaf(struct domain *d, uint8_t **leaf_p)
+static mfn_t paging_new_log_dirty_leaf(
+ struct domain *d, unsigned long **leaf_p)
{
mfn_t mfn = paging_new_log_dirty_page(d, (void **)leaf_p);
if ( mfn_valid(mfn) )
mfn_t gmfn;
int changed;
mfn_t mfn, *l4, *l3, *l2;
- uint8_t *l1;
+ unsigned long *l1;
int i1, i2, i3, i4;
gmfn = _mfn(guest_mfn);
int rv = 0, clean = 0, peek = 1;
unsigned long pages = 0;
mfn_t *l4, *l3, *l2;
- uint8_t *l1;
+ unsigned long *l1;
int i4, i3, i2;
domain_pause(d);
(pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES);
i2++ )
{
- static uint8_t zeroes[PAGE_SIZE];
+ static unsigned long zeroes[PAGE_SIZE/BYTES_PER_LONG];
unsigned int bytes = PAGE_SIZE;
l1 = ((l2 && mfn_valid(l2[i2])) ?
map_domain_page(mfn_x(l2[i2])) : zeroes);
if ( likely(peek) )
{
if ( copy_to_guest_offset(sc->dirty_bitmap, pages >> 3,
- l1, bytes) != 0 )
+ (uint8_t *)l1, bytes) != 0 )
{
rv = -EFAULT;
goto out;
{
unsigned long pfn;
mfn_t mfn, *l4, *l3, *l2;
- uint8_t *l1;
+ unsigned long *l1;
int rv;
ASSERT(shadow_mode_log_dirty(d));
if ( !is_idle_domain(d) )
{
set_bit(_VPF_down, &v->pause_flags);
- v->vcpu_info = shared_info_addr(d, vcpu_info[vcpu_id]);
+ v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]);
}
if ( sched_init_vcpu(v, cpu_id) != 0 )
void evtchn_set_pending(struct vcpu *v, int port)
{
struct domain *d = v->domain;
- shared_info_t *s = d->shared_info;
/*
* The following bit operations must happen in strict order.
* others may require explicit memory barriers.
*/
- if ( test_and_set_bit(port, __shared_info_addr(d, s, evtchn_pending)) )
+ if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
return;
- if ( !test_bit (port, __shared_info_addr(d, s, evtchn_mask)) &&
+ if ( !test_bit (port, &shared_info(d, evtchn_mask)) &&
!test_and_set_bit(port / BITS_PER_GUEST_LONG(d),
- vcpu_info_addr(v, evtchn_pending_sel)) )
+ &vcpu_info(v, evtchn_pending_sel)) )
{
vcpu_mark_events_pending(v);
}
static long evtchn_unmask(evtchn_unmask_t *unmask)
{
struct domain *d = current->domain;
- shared_info_t *s = d->shared_info;
int port = unmask->port;
struct vcpu *v;
* These operations must happen in strict order. Based on
* include/xen/event.h:evtchn_set_pending().
*/
- if ( test_and_clear_bit(port, __shared_info_addr(d, s, evtchn_mask)) &&
- test_bit (port, __shared_info_addr(d, s, evtchn_pending)) &&
+ if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
+ test_bit (port, &shared_info(d, evtchn_pending)) &&
!test_and_set_bit (port / BITS_PER_GUEST_LONG(d),
- vcpu_info_addr(v, evtchn_pending_sel)) )
+ &vcpu_info(v, evtchn_pending_sel)) )
{
vcpu_mark_events_pending(v);
}
printk(" Notifying guest (virq %d, port %d, stat %d/%d/%d)\n",
VIRQ_DEBUG, v->virq_to_evtchn[VIRQ_DEBUG],
test_bit(v->virq_to_evtchn[VIRQ_DEBUG],
- shared_info_addr(d, evtchn_pending)),
+ &shared_info(d, evtchn_pending)),
test_bit(v->virq_to_evtchn[VIRQ_DEBUG],
- shared_info_addr(d, evtchn_mask)),
+ &shared_info(d, evtchn_mask)),
test_bit(v->virq_to_evtchn[VIRQ_DEBUG] /
BITS_PER_GUEST_LONG(d),
- vcpu_info_addr(v, evtchn_pending_sel)));
+ &vcpu_info(v, evtchn_pending_sel)));
send_guest_vcpu_virq(v, VIRQ_DEBUG);
}
}
goto out;
rc = 0;
- if ( test_bit(port, shared_info_addr(d, evtchn_pending)) )
+ if ( test_bit(port, &shared_info(d, evtchn_pending)) )
goto out;
}
#define domain_iommu_domid(d) ((d)->arch.hvm_domain.hvm_iommu.iommu_domid)
static spinlock_t domid_bitmap_lock; /* protect domain id bitmap */
-static int domid_bitmap_size; /* domain id bitmap size in bit */
-static void *domid_bitmap; /* iommu domain id bitmap */
+static int domid_bitmap_size; /* domain id bitmap size in bits */
+static unsigned long *domid_bitmap; /* iommu domain id bitmap */
#define DID_FIELD_WIDTH 16
#define DID_HIGH_OFFSET 8
/* Allocate domain id bitmap, and set bit 0 as reserved */
domid_bitmap_size = cap_ndoms(iommu->cap);
- domid_bitmap = xmalloc_bytes(domid_bitmap_size / 8);
+ domid_bitmap = xmalloc_array(unsigned long,
+ BITS_TO_LONGS(domid_bitmap_size));
if ( domid_bitmap == NULL )
goto error;
memset(domid_bitmap, 0, domid_bitmap_size / 8);
((font->width + 7) >> 3));
for ( b = font->width; b--; )
{
- pixel = test_bit(b, bits) ? pixel_on : 0;
+ pixel = (*bits & (1u<<b)) ? pixel_on : 0;
memcpy(ptr, &pixel, bpp);
ptr += bpp;
}
#define ADDR (*(volatile long *) addr)
#define CONST_ADDR (*(const volatile long *) addr)
+extern void __bitop_bad_size(void);
+#define bitop_bad_size(addr) (min(sizeof(*(addr)), __alignof__(*(addr))) < 4)
+
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static __inline__ void set_bit(int nr, volatile void * addr)
+static inline void set_bit(int nr, volatile void *addr)
{
- __asm__ __volatile__( LOCK_PREFIX
- "btsl %1,%0"
- :"=m" (ADDR)
- :"dIr" (nr), "m" (ADDR) : "memory");
+ asm volatile (
+ LOCK_PREFIX
+ "btsl %1,%0"
+ : "=m" (ADDR)
+ : "Ir" (nr), "m" (ADDR) : "memory");
}
+#define set_bit(nr, addr) ({ \
+ if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
+ set_bit(nr, addr); \
+})
/**
* __set_bit - Set a bit in memory
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static __inline__ void __set_bit(int nr, volatile void * addr)
+static inline void __set_bit(int nr, volatile void *addr)
{
- __asm__(
- "btsl %1,%0"
- :"=m" (ADDR)
- :"dIr" (nr), "m" (ADDR) : "memory");
+ asm volatile (
+ "btsl %1,%0"
+ : "=m" (ADDR)
+ : "Ir" (nr), "m" (ADDR) : "memory");
}
+#define __set_bit(nr, addr) ({ \
+ if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
+ __set_bit(nr, addr); \
+})
/**
* clear_bit - Clears a bit in memory
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
-static __inline__ void clear_bit(int nr, volatile void * addr)
+static inline void clear_bit(int nr, volatile void *addr)
{
- __asm__ __volatile__( LOCK_PREFIX
- "btrl %1,%0"
- :"=m" (ADDR)
- :"dIr" (nr), "m" (ADDR) : "memory");
+ asm volatile (
+ LOCK_PREFIX
+ "btrl %1,%0"
+ : "=m" (ADDR)
+ : "Ir" (nr), "m" (ADDR) : "memory");
}
+#define clear_bit(nr, addr) ({ \
+ if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
+ clear_bit(nr, addr); \
+})
/**
* __clear_bit - Clears a bit in memory
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static __inline__ void __clear_bit(int nr, volatile void * addr)
+static inline void __clear_bit(int nr, volatile void *addr)
{
- __asm__(
- "btrl %1,%0"
- :"=m" (ADDR)
- :"dIr" (nr), "m" (ADDR) : "memory");
+ asm volatile (
+ "btrl %1,%0"
+ : "=m" (ADDR)
+ : "Ir" (nr), "m" (ADDR) : "memory");
}
+#define __clear_bit(nr, addr) ({ \
+ if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
+ __clear_bit(nr, addr); \
+})
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
/**
* __change_bit - Toggle a bit in memory
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static __inline__ void __change_bit(int nr, volatile void * addr)
+static inline void __change_bit(int nr, volatile void *addr)
{
- __asm__ __volatile__(
- "btcl %1,%0"
- :"=m" (ADDR)
- :"dIr" (nr), "m" (ADDR) : "memory");
+ asm volatile (
+ "btcl %1,%0"
+ : "=m" (ADDR)
+ : "Ir" (nr), "m" (ADDR) : "memory");
}
+#define __change_bit(nr, addr) ({ \
+ if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
+ __change_bit(nr, addr); \
+})
/**
* change_bit - Toggle a bit in memory
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static __inline__ void change_bit(int nr, volatile void * addr)
+static inline void change_bit(int nr, volatile void *addr)
{
- __asm__ __volatile__( LOCK_PREFIX
- "btcl %1,%0"
- :"=m" (ADDR)
- :"dIr" (nr), "m" (ADDR) : "memory");
+ asm volatile (
+ LOCK_PREFIX
+ "btcl %1,%0"
+ : "=m" (ADDR)
+ : "Ir" (nr), "m" (ADDR) : "memory");
}
+#define change_bit(nr, addr) ({ \
+ if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
+ change_bit(nr, addr); \
+})
/**
* test_and_set_bit - Set a bit and return its old value
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static __inline__ int test_and_set_bit(int nr, volatile void * addr)
+static inline int test_and_set_bit(int nr, volatile void *addr)
{
- int oldbit;
-
- __asm__ __volatile__( LOCK_PREFIX
- "btsl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
- :"dIr" (nr), "m" (ADDR) : "memory");
- return oldbit;
+ int oldbit;
+
+ asm volatile (
+ LOCK_PREFIX
+ "btsl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR)
+ : "Ir" (nr), "m" (ADDR) : "memory");
+ return oldbit;
}
+#define test_and_set_bit(nr, addr) ({ \
+ if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
+ test_and_set_bit(nr, addr); \
+})
/**
* __test_and_set_bit - Set a bit and return its old value
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
+static inline int __test_and_set_bit(int nr, volatile void *addr)
{
- int oldbit;
+ int oldbit;
- __asm__ __volatile__(
- "btsl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
- :"dIr" (nr), "m" (ADDR) : "memory");
- return oldbit;
+ asm volatile (
+ "btsl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR)
+ : "Ir" (nr), "m" (ADDR) : "memory");
+ return oldbit;
}
+#define __test_and_set_bit(nr, addr) ({ \
+ if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
+ __test_and_set_bit(nr, addr); \
+})
/**
* test_and_clear_bit - Clear a bit and return its old value
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+static inline int test_and_clear_bit(int nr, volatile void *addr)
{
- int oldbit;
-
- __asm__ __volatile__( LOCK_PREFIX
- "btrl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
- :"dIr" (nr), "m" (ADDR) : "memory");
- return oldbit;
+ int oldbit;
+
+ asm volatile (
+ LOCK_PREFIX
+ "btrl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR)
+ : "Ir" (nr), "m" (ADDR) : "memory");
+ return oldbit;
}
+#define test_and_clear_bit(nr, addr) ({ \
+ if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
+ test_and_clear_bit(nr, addr); \
+})
/**
* __test_and_clear_bit - Clear a bit and return its old value
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
+static inline int __test_and_clear_bit(int nr, volatile void *addr)
{
- int oldbit;
+ int oldbit;
- __asm__ __volatile__(
- "btrl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
- :"dIr" (nr), "m" (ADDR) : "memory");
- return oldbit;
+ asm volatile (
+ "btrl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR)
+ : "Ir" (nr), "m" (ADDR) : "memory");
+ return oldbit;
}
+#define __test_and_clear_bit(nr, addr) ({ \
+ if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
+ __test_and_clear_bit(nr, addr); \
+})
/* WARNING: non atomic and it can be reordered! */
-static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
+static inline int __test_and_change_bit(int nr, volatile void *addr)
{
- int oldbit;
+ int oldbit;
- __asm__ __volatile__(
- "btcl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
- :"dIr" (nr), "m" (ADDR) : "memory");
- return oldbit;
+ asm volatile (
+ "btcl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR)
+ : "Ir" (nr), "m" (ADDR) : "memory");
+ return oldbit;
}
+#define __test_and_change_bit(nr, addr) ({ \
+ if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
+ __test_and_change_bit(nr, addr); \
+})
/**
* test_and_change_bit - Change a bit and return its new value
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static __inline__ int test_and_change_bit(int nr, volatile void * addr)
+static inline int test_and_change_bit(int nr, volatile void *addr)
{
- int oldbit;
-
- __asm__ __volatile__( LOCK_PREFIX
- "btcl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
- :"dIr" (nr), "m" (ADDR) : "memory");
- return oldbit;
+ int oldbit;
+
+ asm volatile (
+ LOCK_PREFIX
+ "btcl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR)
+ : "Ir" (nr), "m" (ADDR) : "memory");
+ return oldbit;
}
+#define test_and_change_bit(nr, addr) ({ \
+ if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
+ test_and_change_bit(nr, addr); \
+})
-
-static __inline__ int constant_test_bit(int nr, const volatile void * addr)
+static inline int constant_test_bit(int nr, const volatile void *addr)
{
- return ((1U << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+ return ((1U << (nr & 31)) &
+ (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
}
-static __inline__ int variable_test_bit(int nr, const volatile void * addr)
+static inline int variable_test_bit(int nr, const volatile void *addr)
{
- int oldbit;
+ int oldbit;
- __asm__ __volatile__(
- "btl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit)
- :"m" (CONST_ADDR),"dIr" (nr));
- return oldbit;
+ asm volatile (
+ "btl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit)
+ : "m" (CONST_ADDR), "Ir" (nr) : "memory" );
+ return oldbit;
}
-#define test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- constant_test_bit((nr),(addr)) : \
- variable_test_bit((nr),(addr)))
+#define test_bit(nr, addr) ({ \
+ if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
+ (__builtin_constant_p(nr) ? \
+ constant_test_bit((nr),(addr)) : \
+ variable_test_bit((nr),(addr))); \
+})
extern unsigned int __find_first_bit(
const unsigned long *addr, unsigned int size);
/* return index of first bit set in val or BITS_PER_LONG when no bit is set */
static inline unsigned int __scanbit(unsigned long val)
{
- __asm__ ( "bsf %1,%0" : "=r" (val) : "r" (val), "0" (BITS_PER_LONG) );
- return (unsigned int)val;
+ asm ( "bsf %1,%0" : "=r" (val) : "r" (val), "0" (BITS_PER_LONG) );
+ return (unsigned int)val;
}
/**
* Returns the bit-number of the first set bit. If no bits are set then the
* result is undefined.
*/
-static __inline__ unsigned int find_first_set_bit(unsigned long word)
+static inline unsigned int find_first_set_bit(unsigned long word)
{
- __asm__ ( "bsf %1,%0" : "=r" (word) : "r" (word) );
- return (unsigned int)word;
+ asm ( "bsf %1,%0" : "=r" (word) : "r" (word) );
+ return (unsigned int)word;
}
/**
*/
static inline unsigned long ffz(unsigned long word)
{
- __asm__("bsf %1,%0"
- :"=r" (word)
- :"r" (~word));
- return word;
+ asm ( "bsf %1,%0"
+ :"=r" (word)
+ :"r" (~word));
+ return word;
}
/**
*/
static inline int ffs(unsigned long x)
{
- long r;
+ long r;
- __asm__("bsf %1,%0\n\t"
- "jnz 1f\n\t"
- "mov $-1,%0\n"
- "1:" : "=r" (r) : "rm" (x));
- return (int)r+1;
+ asm ( "bsf %1,%0\n\t"
+ "jnz 1f\n\t"
+ "mov $-1,%0\n"
+ "1:" : "=r" (r) : "rm" (x));
+ return (int)r+1;
}
/**
*/
static inline int fls(unsigned long x)
{
- long r;
+ long r;
- __asm__("bsr %1,%0\n\t"
- "jnz 1f\n\t"
- "mov $-1,%0\n"
- "1:" : "=r" (r) : "rm" (x));
- return (int)r+1;
+ asm ( "bsr %1,%0\n\t"
+ "jnz 1f\n\t"
+ "mov $-1,%0\n"
+ "1:" : "=r" (r) : "rm" (x));
+ return (int)r+1;
}
/**
static inline void vcpu_mark_events_pending(struct vcpu *v)
{
- if ( test_and_set_bit(0, &vcpu_info(v, evtchn_upcall_pending)) )
+ int already_pending = test_and_set_bit(
+ 0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending));
+
+ if ( already_pending )
return;
if ( is_hvm_vcpu(v) )
static inline void gnttab_clear_flag(unsigned long nr, uint16_t *addr)
{
- clear_bit(nr, addr);
+ clear_bit(nr, (unsigned long *)addr);
}
/* Foreign mappings of HHVM-guest pages do not modify the type count. */
#define HVM_DBG_LOG(level, _f, _a...)
#endif
-extern char hvm_io_bitmap[];
+extern unsigned long hvm_io_bitmap[];
void hvm_enable(struct hvm_function_table *);
struct vmcb_struct *vmcb;
u64 vmcb_pa;
u64 asid_generation; /* ASID tracking, moved here for cache locality. */
- char *msrpm;
+ unsigned long *msrpm;
int launch_core;
bool_t vmcb_in_sync; /* VMCB sync'ed with VMSAVE? */
};
unsigned long cstar;
#endif
- char *msr_bitmap;
+ unsigned long *msr_bitmap;
unsigned int msr_count;
struct vmx_msr_entry *msr_area;
unsigned int host_msr_count;
#ifdef CONFIG_COMPAT
-#define nmi_reason(d) (!has_32bit_shinfo(d) ? \
- (void *)&(d)->shared_info->native.arch.nmi_reason : \
- (void *)&(d)->shared_info->compat.arch.nmi_reason)
+#define nmi_reason(d) (!has_32bit_shinfo(d) ? \
+ (u32 *)&(d)->shared_info->native.arch.nmi_reason : \
+ (u32 *)&(d)->shared_info->compat.arch.nmi_reason)
#define GET_SET_SHARED(type, field) \
static inline type arch_get_##field(const struct domain *d) \
#else
-#define nmi_reason(d) ((void *)&(d)->shared_info->arch.nmi_reason)
+#define nmi_reason(d) ((u32 *)&(d)->shared_info->arch.nmi_reason)
#define GET_SET_SHARED(type, field) \
static inline type arch_get_##field(const struct domain *d) \
struct compat_shared_info compat;
} shared_info_t;
-#define __shared_info(d, s, field) (*(!has_32bit_shinfo(d) ? \
- &(s)->native.field : \
- &(s)->compat.field))
-#define __shared_info_addr(d, s, field) (!has_32bit_shinfo(d) ? \
- (void *)&(s)->native.field : \
- (void *)&(s)->compat.field)
-
+/*
+ * Compat field is never larger than native field, so cast to that as it
+ * is the largest memory range it is safe for the caller to modify without
+ * further discrimination between compat and native cases.
+ */
+#define __shared_info(d, s, field) \
+ (*(!has_32bit_shinfo(d) ? \
+ (typeof(&(s)->compat.field))&(s)->native.field : \
+ (typeof(&(s)->compat.field))&(s)->compat.field))
#define shared_info(d, field) \
__shared_info(d, (d)->shared_info, field)
-#define shared_info_addr(d, field) \
- __shared_info_addr(d, (d)->shared_info, field)
typedef union {
struct vcpu_info native;
struct compat_vcpu_info compat;
} vcpu_info_t;
-#define vcpu_info(v, field) (*(!has_32bit_shinfo((v)->domain) ? \
- &(v)->vcpu_info->native.field : \
- &(v)->vcpu_info->compat.field))
-#define vcpu_info_addr(v, field) (!has_32bit_shinfo((v)->domain) ? \
- (void *)&(v)->vcpu_info->native.field : \
- (void *)&(v)->vcpu_info->compat.field)
+/* As above, cast to compat field type. */
+#define vcpu_info(v, field) \
+ (*(!has_32bit_shinfo((v)->domain) ? \
+ (typeof(&(v)->vcpu_info->compat.field))&(v)->vcpu_info->native.field : \
+ (typeof(&(v)->vcpu_info->compat.field))&(v)->vcpu_info->compat.field))
#else
typedef struct shared_info shared_info_t;
-
-#define __shared_info(d, s, field) ((s)->field)
-#define __shared_info_addr(d, s, field) ((void *)&(s)->field)
-
#define shared_info(d, field) ((d)->shared_info->field)
-#define shared_info_addr(d, field) ((void *)&(d)->shared_info->field)
typedef struct vcpu_info vcpu_info_t;
-
#define vcpu_info(v, field) ((v)->vcpu_info->field)
-#define vcpu_info_addr(v, field) ((void *)&(v)->vcpu_info->field)
#endif